From: George Dunlap Date: Thu, 4 Feb 2016 10:41:07 +0000 (+0100) Subject: x86/mm: clean up pfec handling in gva_to_gfn X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~1806 X-Git-Url: https://dgit.raspbian.org/%22http://www.example.com/cgi/%22/%22http:/www.example.com/cgi/%22?a=commitdiff_plain;h=e3e4ae8bb8314462496c2b065cfe4e8bc5205d5a;p=xen.git x86/mm: clean up pfec handling in gva_to_gfn At the moment, the pfec argument to gva_to_gfn has two functions: * To inform guest_walk what kind of access is happenind * As a value to pass back into the guest in the event of a fault. Unfortunately this is not quite treated consistently: the hvm_fetch_* function will "pre-clear" the PFEC_insn_fetch flag before calling gva_to_gfn; meaning guest_walk doesn't actually know whether a given access is an instruction fetch or not. This works now, but will cause issues when pkeys are introduced, since guest_walk will need to know whether an access is an instruction fetch even if it doesn't return PFEC_insn_fetch. Fix this by making a clean separation for in and out functionalities of the pfec argument: 1. Always pass in the access type to gva_to_gfn 2. Filter out inappropriate access flags before returning from gva_to_gfn. (The PFEC_insn_fetch flag should only be passed to the guest if either NX or SMEP is enabled. See Intel 64 Developer's Manual, Volume 3, Chapter Paging, PAGE-FAULT EXCEPTIONS) Signed-off-by: George Dunlap Signed-off-by: Huaitong Han Acked-by: Jan Beulich Acked-by: Tim Deegan --- diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index dfeecca3bc..35ec6c95e7 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4487,11 +4487,9 @@ enum hvm_copy_result hvm_copy_from_guest_virt( enum hvm_copy_result hvm_fetch_from_guest_virt( void *buf, unsigned long vaddr, int size, uint32_t pfec) { - if ( hvm_nx_enabled(current) || hvm_smep_enabled(current) ) - pfec |= PFEC_insn_fetch; return __hvm_copy(buf, vaddr, size, HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt, - PFEC_page_present | pfec); + PFEC_page_present | PFEC_insn_fetch | pfec); } enum hvm_copy_result hvm_copy_to_guest_virt_nofault( @@ -4513,11 +4511,9 @@ enum hvm_copy_result hvm_copy_from_guest_virt_nofault( enum hvm_copy_result hvm_fetch_from_guest_virt_nofault( void *buf, unsigned long vaddr, int size, uint32_t pfec) { - if ( hvm_nx_enabled(current) || hvm_smep_enabled(current) ) - pfec |= PFEC_insn_fetch; return __hvm_copy(buf, vaddr, size, HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt, - PFEC_page_present | pfec); + PFEC_page_present | PFEC_insn_fetch | pfec); } unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len) diff --git a/xen/arch/x86/mm/hap/guest_walk.c b/xen/arch/x86/mm/hap/guest_walk.c index 11c1b35266..8aa7e0fe46 100644 --- a/xen/arch/x86/mm/hap/guest_walk.c +++ b/xen/arch/x86/mm/hap/guest_walk.c @@ -82,7 +82,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)( if ( !top_page ) { pfec[0] &= ~PFEC_page_present; - return INVALID_GFN; + goto out_tweak_pfec; } top_mfn = _mfn(page_to_mfn(top_page)); @@ -136,6 +136,14 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)( if ( missing & _PAGE_SHARED ) pfec[0] = PFEC_page_shared; + out_tweak_pfec: + /* + * SDM Intel 64 Volume 3, Chapter Paging, PAGE-FAULT EXCEPTIONS: + * The PFEC_insn_fetch flag is set only when NX or SMEP are enabled. + */ + if ( !hvm_nx_enabled(v) && !hvm_smep_enabled(v) ) + pfec[0] &= ~PFEC_insn_fetch; + return INVALID_GFN; } diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index 162c06fba4..d42597c0e0 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -3669,6 +3669,12 @@ sh_gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m, pfec[0] &= ~PFEC_page_present; if ( missing & _PAGE_INVALID_BITS ) pfec[0] |= PFEC_reserved_bit; + /* + * SDM Intel 64 Volume 3, Chapter Paging, PAGE-FAULT EXCEPTIONS: + * The PFEC_insn_fetch flag is set only when NX or SMEP are enabled. + */ + if ( is_hvm_vcpu(v) && !hvm_nx_enabled(v) && !hvm_smep_enabled(v) ) + pfec[0] &= ~PFEC_insn_fetch; return INVALID_GFN; } gfn = guest_walk_to_gfn(&gw); diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h index 9a8653dddb..195fe8f21f 100644 --- a/xen/include/asm-x86/paging.h +++ b/xen/include/asm-x86/paging.h @@ -255,7 +255,11 @@ static inline bool_t paging_invlpg(struct vcpu *v, unsigned long va) * tables don't map this address for this kind of access. * pfec[0] is used to determine which kind of access this is when * walking the tables. The caller should set the PFEC_page_present bit - * in pfec[0]; in the failure case, that bit will be cleared if appropriate. */ + * in pfec[0]; in the failure case, that bit will be cleared if appropriate. + * + * SDM Intel 64 Volume 3, Chapter Paging, PAGE-FAULT EXCEPTIONS: + * The PFEC_insn_fetch flag is set only when NX or SMEP are enabled. + */ unsigned long paging_gva_to_gfn(struct vcpu *v, unsigned long va, uint32_t *pfec);